PAGE_SHIFT] = INVALID_M2P_ENTRY;
ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
- PAGE_HYPERVISOR);
-
+ l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+
ed->arch.guest_vtable = __linear_l2_table;
ed->arch.shadow_vtable = __shadow_linear_l2_table;
ed->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
- PAGE_HYPERVISOR);
+ l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}
#ifdef CONFIG_VMX
if ( p == 0 )
panic("Not enough memory for frame table\n");
map_pages_to_xen(
- FRAMETABLE_VIRT_START + i, p, 4UL << 20, PAGE_HYPERVISOR);
+ FRAMETABLE_VIRT_START + i,
+ p >> PAGE_SHIFT,
+ 4UL << (20-PAGE_SHIFT),
+ PAGE_HYPERVISOR);
}
memset(frame_table, 0, frame_table_size);
free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page);
}
-/* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
int map_pages_to_xen(
- unsigned long v,
- unsigned long p,
- unsigned long s,
+ unsigned long virt,
+ unsigned long pfn,
+ unsigned long nr_pfns,
unsigned long flags)
{
l2_pgentry_t *pl2e, ol2e;
- l1_pgentry_t *pl1e;
+ l1_pgentry_t *pl1e, ol1e;
unsigned int i;
unsigned int map_small_pages = !!(flags & MAP_SMALL_PAGES);
flags &= ~MAP_SMALL_PAGES;
- while ( s != 0 )
+ while ( nr_pfns != 0 )
{
- pl2e = virt_to_xen_l2e(v);
+ pl2e = virt_to_xen_l2e(virt);
- if ( (((v|p) & ((1 << L2_PAGETABLE_SHIFT) - 1)) == 0) &&
- (s >= (1 << L2_PAGETABLE_SHIFT)) &&
+ if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
+ (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
!map_small_pages )
{
/* Super-page mapping. */
ol2e = *pl2e;
- *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
+ *pl2e = l2e_create_pfn(pfn, flags|_PAGE_PSE);
if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
{
free_xen_pagetable(l2e_get_page(*pl2e));
}
- v += 1 << L2_PAGETABLE_SHIFT;
- p += 1 << L2_PAGETABLE_SHIFT;
- s -= 1 << L2_PAGETABLE_SHIFT;
+ virt += 1UL << L2_PAGETABLE_SHIFT;
+ pfn += 1UL << PAGETABLE_ORDER;
+ nr_pfns -= 1UL << PAGETABLE_ORDER;
}
else
{
local_flush_tlb_pge();
}
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
- if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
- local_flush_tlb_one(v);
- *pl1e = l1e_create_phys(p, flags);
+ pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
+ ol1e = *pl1e;
+ *pl1e = l1e_create_pfn(pfn, flags);
+ if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
+ local_flush_tlb_one(virt);
- v += 1 << L1_PAGETABLE_SHIFT;
- p += 1 << L1_PAGETABLE_SHIFT;
- s -= 1 << L1_PAGETABLE_SHIFT;
+ virt += 1UL << L1_PAGETABLE_SHIFT;
+ pfn += 1UL;
+ nr_pfns -= 1UL;
}
}
return 0;
}
+void __set_fixmap(
+ enum fixed_addresses idx, unsigned long p, unsigned long flags)
+{
+ if ( unlikely(idx >= __end_of_fixed_addresses) )
+ BUG();
+ map_pages_to_xen(fix_to_virt(idx), p >> PAGE_SHIFT, 1, flags);
+}
+
#ifdef MEMORY_GUARD
void memguard_init(void)
{
map_pages_to_xen(
- PAGE_OFFSET, 0, xenheap_phys_end, __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
+ PAGE_OFFSET, 0, xenheap_phys_end >> PAGE_SHIFT,
+ __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
}
static void __memguard_change_range(void *p, unsigned long l, int guard)
if ( guard )
flags &= ~_PAGE_PRESENT;
- map_pages_to_xen((unsigned long)(_p), __pa(_p), _l, flags);
+ map_pages_to_xen(
+ _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
}
void memguard_guard_range(void *p, unsigned long l)
/* Map default GDT into their final position in the idle page table. */
map_pages_to_xen(
GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE,
- virt_to_phys(gdt_table), PAGE_SIZE, PAGE_HYPERVISOR);
+ virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
/* Process CPU type information. */
identify_cpu(&boot_cpu_data);
* due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
*/
{
- unsigned long start = (unsigned long)e820.map[i].addr;
- unsigned long size = (unsigned long)e820.map[i].size;
- size = (size + (start & ~PAGE_MASK) + PAGE_SIZE - 1) & PAGE_MASK;
- if ( (start &= PAGE_MASK) < (64UL << 20) )
- {
- if ( (signed long)(size -= (64UL << 20) - start) <= 0 )
- continue;
- start = 64UL << 20;
- }
+ /* Calculate page-frame range, discarding partial frames. */
+ unsigned long start, end;
+ start = (e820.map[i].addr + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
+ /* Clip the range to above 64MB. */
+ if ( end < (64UL << (20-PAGE_SHIFT)) )
+ continue;
+ if ( start < (64UL << (20-PAGE_SHIFT)) )
+ start = 64UL << (20-PAGE_SHIFT);
+ /* Request the mapping. */
map_pages_to_xen(
- PAGE_OFFSET + start, start, size, PAGE_HYPERVISOR);
+ PAGE_OFFSET + (start << PAGE_SHIFT),
+ start, end-start, PAGE_HYPERVISOR);
}
#endif
}
memset(l1, 0, PAGE_SIZE);
unmap_domain_mem_with_cache(l1, l1cache);
- l2e = l2e_create_pfn(page_to_pfn(l1page), __PAGE_HYPERVISOR);
+ l2e = l2e_create_page(l1page, __PAGE_HYPERVISOR);
l2[l2_table_offset(va)] = l2e;
}
unmap_domain_mem_with_cache(l2, l2cache);
return &idle_pg_table[l2_table_offset(v)];
}
-void __set_fixmap(
- enum fixed_addresses idx, unsigned long p, unsigned long flags)
-{
- if ( unlikely(idx >= __end_of_fixed_addresses) )
- BUG();
- map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
-}
-
void __init paging_init(void)
{
void *ioremap_pt;
return pl2e;
}
-void __set_fixmap(
- enum fixed_addresses idx, unsigned long p, unsigned long flags)
-{
- if ( unlikely(idx >= __end_of_fixed_addresses) )
- BUG();
- map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
-}
-
void __init paging_init(void)
{
- unsigned long i, p;
+ unsigned long i;
l3_pgentry_t *l3rw, *l3ro;
struct pfn_info *pg;
NULL, L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
if ( pg == NULL )
panic("Not enough memory for m2p table\n");
- p = page_to_phys(pg);
map_pages_to_xen(
- RDWR_MPT_VIRT_START + i*8, p,
- 1UL << L2_PAGETABLE_SHIFT, PAGE_HYPERVISOR | _PAGE_USER);
+ RDWR_MPT_VIRT_START + i*8, page_to_pfn(pg),
+ 1UL << (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT),
+ PAGE_HYPERVISOR | _PAGE_USER);
memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
1UL << L2_PAGETABLE_SHIFT);
}
#define l3e_get_page(_x) (pfn_to_page(l3e_get_pfn(_x)))
#define l4e_get_page(_x) (pfn_to_page(l4e_get_pfn(_x)))
+#define l1e_create_page(_x,_y) (l1e_create_pfn(page_to_pfn(_x),(_y)))
+#define l2e_create_page(_x,_y) (l2e_create_pfn(page_to_pfn(_x),(_y)))
+#define l3e_create_page(_x,_y) (l3e_create_pfn(page_to_pfn(_x),(_y)))
+#define l4e_create_page(_x,_y) (l4e_create_pfn(page_to_pfn(_x),(_y)))
+
/* High table entries are reserved by the hypervisor. */
#define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
(HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
void free_xen_pagetable(struct pfn_info *pg);
l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
-/* Map physical byte range (@p, @p+@s) at address @v in Xen address space. */
+/* Map physical page range in Xen virtual address space. */
#define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
int
map_pages_to_xen(
- unsigned long v,
- unsigned long p,
- unsigned long s,
+ unsigned long virt,
+ unsigned long pfn,
+ unsigned long nr_pfns,
unsigned long flags);
#endif /* !__ASSEMBLY__ */